3fbba6dbEVkVMX0JuDFzap9jeaucGA tools/xc/lib/xc_bvtsched.c
3fbba6dbasJQV-MVElDC0DGSHMiL5w tools/xc/lib/xc_domain.c
40278d99BLsfUv3qxv0I8C1sClZ0ow tools/xc/lib/xc_elf.h
+403e0977Bjsm_e82pwvl9VvaJxh8Gg tools/xc/lib/xc_evtchn.c
3fbba6dbNCU7U6nsMYiXzKkp3ztaJg tools/xc/lib/xc_linux_build.c
3fbba6dbl267zZOAVHYLOdLCdhcZMw tools/xc/lib/xc_linux_restore.c
3fbba6db7li3FJiABYtCmuGxOJxEGw tools/xc/lib/xc_linux_save.c
unsigned int max_vbds,
xc_vbd_t *vbds);
+#define DOMID_SELF (~1ULL)
+#define EVTCHNSTAT_closed 0 /* Chennel is not in use. */
+#define EVTCHNSTAT_disconnected 1 /* Channel is not connected to remote. */
+#define EVTCHNSTAT_connected 2 /* Channel is connected to remote. */
+int xc_evtchn_open(int xc_handle,
+ u64 dom1, /* may be DOMID_SELF */
+ u64 dom2,
+ int *port1,
+ int *port2);
+int xc_evtchn_close(int xc_handle,
+ u64 dom, /* may be DOMID_SELF */
+ int port);
+int xc_evtchn_send(int xc_handle,
+ int local_port);
+int xc_evtchn_status(int xc_handle,
+ u64 dom1, /* may be DOMID_SELF */
+ int port1,
+ u64 *dom2,
+ int *port2,
+ int *chn_status);
+
int xc_readconsolering(int xc_handle,
char *str,
unsigned int max_chars,
--- /dev/null
+/******************************************************************************
+ * xc_evtchn.c
+ *
+ * API for manipulating and accessing inter-domain event channels.
+ *
+ * Copyright (c) 2004, K A Fraser.
+ */
+
+#include "xc_private.h"
+
+static int do_evtchn_op(int xc_handle, evtchn_op_t *op)
+{
+ int ret = -1;
+ privcmd_hypercall_t hypercall;
+
+ hypercall.op = __HYPERVISOR_event_channel_op;
+ hypercall.arg[0] = (unsigned long)op;
+
+ if ( mlock(op, sizeof(*op)) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ goto out1;
+ }
+
+ if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
+ goto out2;
+
+ out2: (void)munlock(op, sizeof(*op));
+ out1: return ret;
+}
+
+int xc_evtchn_open(int xc_handle,
+ u64 dom1,
+ u64 dom2,
+ int *port1,
+ int *port2)
+{
+ evtchn_op_t op;
+ int rc;
+
+ op.cmd = EVTCHNOP_open;
+ op.u.open.dom1 = (domid_t)dom1;
+ op.u.open.dom2 = (domid_t)dom2;
+
+ if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
+ {
+ if ( port1 != NULL )
+ *port1 = op.u.open.port1;
+ if ( port2 != NULL )
+ *port2 = op.u.open.port2;
+ }
+
+ return rc;
+}
+
+
+int xc_evtchn_close(int xc_handle,
+ u64 dom,
+ int port)
+{
+ evtchn_op_t op;
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = (domid_t)dom;
+ op.u.close.port = port;
+ return do_evtchn_op(xc_handle, &op);
+}
+
+
+int xc_evtchn_send(int xc_handle,
+ int local_port)
+{
+ evtchn_op_t op;
+ op.cmd = EVTCHNOP_send;
+ op.u.send.local_port = local_port;
+ return do_evtchn_op(xc_handle, &op);
+}
+
+
+int xc_evtchn_status(int xc_handle,
+ u64 dom1,
+ int port1,
+ u64 *dom2,
+ int *port2,
+ int *chn_status)
+{
+ evtchn_op_t op;
+ int rc;
+
+ op.cmd = EVTCHNOP_status;
+ op.u.status.dom1 = (domid_t)dom1;
+ op.u.status.port1 = port1;
+
+ if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
+ {
+ if ( dom2 != NULL )
+ *dom2 = (u64)op.u.status.dom2;
+ if ( port2 != NULL )
+ *port2 = op.u.status.port2;
+ if ( chn_status != NULL )
+ *chn_status = op.u.status.status;
+ }
+
+ return rc;
+}
#include <hypervisor-ifs/hypervisor-if.h>
#include <hypervisor-ifs/dom0_ops.h>
#include <hypervisor-ifs/vbd.h>
+#include <hypervisor-ifs/event_channel.h>
#define _PAGE_PRESENT 0x001
#define _PAGE_RW 0x002
return list;
}
+static PyObject *pyxc_evtchn_open(PyObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+ PyObject *dict;
+
+ u64 dom1 = DOMID_SELF, dom2;
+ int port1, port2, ret;
+
+ static char *kwd_list[] = { "dom2", "dom1", NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "L|L", kwd_list,
+ &dom2, &dom1) )
+ {
+ DPRINTF("could not parse parameter list.");
+ return NULL;
+ }
+
+ ret = xc_evtchn_open(xc->xc_handle, dom1, dom2, &port1, &port2);
+
+ if ( ret < 0 )
+ dict = Py_BuildValue("{}");
+ else
+ dict = Py_BuildValue("{s:i,s:i}",
+ "port1", port1,
+ "port2", port2);
+
+ return dict;
+}
+
+static PyObject *pyxc_evtchn_close(PyObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+
+ u64 dom = DOMID_SELF;
+ int port, ret;
+
+ static char *kwd_list[] = { "port", "dom", NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|L", kwd_list,
+ &port, &dom) )
+ {
+ DPRINTF("could not parse parameter list.");
+ return NULL;
+ }
+
+ ret = xc_evtchn_close(xc->xc_handle, dom, port);
+
+ return PyInt_FromLong(ret);
+}
+
+static PyObject *pyxc_evtchn_send(PyObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+
+ int port, ret;
+
+ static char *kwd_list[] = { "port", NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i", kwd_list, &port) )
+ {
+ DPRINTF("could not parse parameter list.");
+ return NULL;
+ }
+
+ ret = xc_evtchn_send(xc->xc_handle, port);
+
+ return PyInt_FromLong(ret);
+}
+
+static PyObject *pyxc_evtchn_status(PyObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+ PyObject *dict;
+
+ u64 dom1 = DOMID_SELF, dom2;
+ int port1, port2, status, ret;
+
+ static char *kwd_list[] = { "port", "dom", NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|L", kwd_list,
+ &port1, &dom1) )
+ {
+ DPRINTF("could not parse parameter list.");
+ return NULL;
+ }
+
+ ret = xc_evtchn_status(xc->xc_handle, dom1, port1, &dom2, &port2, &status);
+
+ if ( ret < 0 )
+ {
+ dict = Py_BuildValue("{}");
+ }
+ else
+ {
+ switch ( status )
+ {
+ case EVTCHNSTAT_closed:
+ dict = Py_BuildValue("{s:s}",
+ "status", "closed");
+ break;
+ case EVTCHNSTAT_disconnected:
+ dict = Py_BuildValue("{s:s}",
+ "status", "disconnected");
+ break;
+ case EVTCHNSTAT_connected:
+ dict = Py_BuildValue("{s:s,s:L,s:i}",
+ "status", "connected",
+ "dom", dom2,
+ "port", port2);
+ break;
+ default:
+ dict = Py_BuildValue("{}");
+ break;
+ }
+ }
+
+ return dict;
+}
+
static PyObject *pyxc_readconsolering(PyObject *self,
PyObject *args,
PyObject *kwds)
" writeable [int]: Bool - is this VBD writeable?\n"
" nr_sectors [long]: Size of this VBD, in 512-byte sectors.\n" },
+ { "evtchn_open",
+ (PyCFunction)pyxc_evtchn_open,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Open an event channel between two domains.\n"
+ " dom1 [long, SELF]: First domain to be connected.\n"
+ " dom2 [long]: Second domain to be connected.\n\n"
+ "Returns: [dict] dictionary is empty on failure.\n"
+ " port1 [int]: Port-id for endpoint at dom1.\n"
+ " port2 [int]: Port-id for endpoint at dom2.\n" },
+
+ { "evtchn_close",
+ (PyCFunction)pyxc_evtchn_close,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Close an event channel.\n"
+ " dom [long, SELF]: Dom-id of one endpoint of the channel.\n"
+ " port [int]: Port-id of one endpoint of the channel.\n\n"
+ "Returns: [int] 0 on success; -1 on error.\n" },
+
+ { "evtchn_send",
+ (PyCFunction)pyxc_evtchn_send,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Send an event along a locally-connected event channel.\n"
+ " port [int]: Port-id of a local channel endpoint.\n\n"
+ "Returns: [int] 0 on success; -1 on error.\n" },
+
+ { "evtchn_status",
+ (PyCFunction)pyxc_evtchn_status,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Query the status of an event channel.\n"
+ " dom [long, SELF]: Dom-id of one endpoint of the channel.\n"
+ " port [int]: Port-id of one endpoint of the channel.\n\n"
+ "Returns: [dict] dictionary is empty on failure.\n"
+ " status [str]: 'closed', 'disconnected', or 'connected'.\n"
+ "The following are also returned if 'status' is 'connected':\n"
+ " dom [long]: Port-id for endpoint at dom1.\n"
+ " port [int]: Port-id for endpoint at dom2.\n" },
+
{ "readconsolering",
(PyCFunction)pyxc_readconsolering,
METH_VARARGS | METH_KEYWORDS, "\n"
static long event_channel_open(evtchn_open_t *open)
{
- struct task_struct *lp, *rp;
- int lport = 0, rport = 0;
+ struct task_struct *p1, *p2;
+ int port1 = 0, port2 = 0;
unsigned long cpu_mask;
- domid_t ldom = open->local_dom, rdom = open->remote_dom;
+ domid_t dom1 = open->dom1, dom2 = open->dom2;
long rc = 0;
if ( !IS_PRIV(current) )
return -EPERM;
- /* 'local_dom' may be DOMID_SELF. 'remote_dom' cannot be.*/
- if ( ldom == DOMID_SELF )
- ldom = current->domain;
+ /* 'dom1' may be DOMID_SELF. 'dom2' cannot be.*/
+ if ( dom1 == DOMID_SELF )
+ dom1 = current->domain;
+ if ( dom2 == DOMID_SELF )
+ return -EINVAL;
/* Event channel must connect distinct domains. */
- if ( ldom == rdom )
+ if ( dom1 == dom2 )
return -EINVAL;
- if ( ((lp = find_domain_by_id(ldom)) == NULL) ||
- ((rp = find_domain_by_id(rdom)) == NULL) )
+ if ( ((p1 = find_domain_by_id(dom1)) == NULL) ||
+ ((p2 = find_domain_by_id(dom2)) == NULL) )
{
- if ( lp != NULL )
- put_task_struct(lp);
+ if ( p1 != NULL )
+ put_task_struct(p1);
return -ESRCH;
}
/* Avoid deadlock by first acquiring lock of domain with smaller id. */
- if ( ldom < rdom )
+ if ( dom1 < dom2 )
{
- spin_lock(&lp->event_channel_lock);
- spin_lock(&rp->event_channel_lock);
+ spin_lock(&p1->event_channel_lock);
+ spin_lock(&p2->event_channel_lock);
}
else
{
- spin_lock(&rp->event_channel_lock);
- spin_lock(&lp->event_channel_lock);
+ spin_lock(&p2->event_channel_lock);
+ spin_lock(&p1->event_channel_lock);
}
- if ( (lport = get_free_port(lp)) < 0 )
+ if ( (port1 = get_free_port(p1)) < 0 )
{
- rc = lport;
+ rc = port1;
goto out;
}
- if ( (rport = get_free_port(rp)) < 0 )
+ if ( (port2 = get_free_port(p2)) < 0 )
{
- rc = rport;
+ rc = port2;
goto out;
}
- lp->event_channel[lport].remote_dom = rp;
- lp->event_channel[lport].remote_port = (u16)rport;
- lp->event_channel[lport].state = ECS_CONNECTED;
+ p1->event_channel[port1].remote_dom = p2;
+ p1->event_channel[port1].remote_port = (u16)port2;
+ p1->event_channel[port1].state = ECS_CONNECTED;
- rp->event_channel[rport].remote_dom = lp;
- rp->event_channel[rport].remote_port = (u16)lport;
- rp->event_channel[rport].state = ECS_CONNECTED;
+ p2->event_channel[port2].remote_dom = p1;
+ p2->event_channel[port2].remote_port = (u16)port1;
+ p2->event_channel[port2].state = ECS_CONNECTED;
- cpu_mask = set_event_pending(lp, lport);
- cpu_mask |= set_event_pending(rp, rport);
+ cpu_mask = set_event_pending(p1, port1);
+ cpu_mask |= set_event_pending(p2, port2);
guest_event_notify(cpu_mask);
out:
- spin_unlock(&lp->event_channel_lock);
- spin_unlock(&rp->event_channel_lock);
+ spin_unlock(&p1->event_channel_lock);
+ spin_unlock(&p2->event_channel_lock);
- put_task_struct(lp);
- put_task_struct(rp);
+ put_task_struct(p1);
+ put_task_struct(p2);
- open->local_port = lport;
- open->remote_port = rport;
+ open->port1 = port1;
+ open->port2 = port2;
return rc;
}
-static long __event_channel_close(struct task_struct *lp, int lport)
+static long __event_channel_close(struct task_struct *p1, int port1)
{
- struct task_struct *rp = NULL;
- event_channel_t *lchn, *rchn;
- int rport;
+ struct task_struct *p2 = NULL;
+ event_channel_t *chn1, *chn2;
+ int port2;
unsigned long cpu_mask;
long rc = 0;
again:
- spin_lock(&lp->event_channel_lock);
+ spin_lock(&p1->event_channel_lock);
- lchn = lp->event_channel;
+ chn1 = p1->event_channel;
- if ( (lport < 0) || (lport >= lp->max_event_channel) ||
- (lchn[lport].state == ECS_FREE) )
+ if ( (port1 < 0) || (port1 >= p1->max_event_channel) ||
+ (chn1[port1].state == ECS_FREE) )
{
rc = -EINVAL;
goto out;
}
- if ( lchn[lport].state == ECS_CONNECTED )
+ if ( chn1[port1].state == ECS_CONNECTED )
{
- if ( rp == NULL )
+ if ( p2 == NULL )
{
- rp = lchn[lport].remote_dom;
- get_task_struct(rp);
+ p2 = chn1[port1].remote_dom;
+ get_task_struct(p2);
- if ( lp->domain < rp->domain )
+ if ( p1->domain < p2->domain )
{
- spin_lock(&rp->event_channel_lock);
+ spin_lock(&p2->event_channel_lock);
}
else
{
- spin_unlock(&lp->event_channel_lock);
- spin_lock(&rp->event_channel_lock);
+ spin_unlock(&p1->event_channel_lock);
+ spin_lock(&p2->event_channel_lock);
goto again;
}
}
- else if ( rp != lchn[lport].remote_dom )
+ else if ( p2 != chn1[port1].remote_dom )
{
rc = -EINVAL;
goto out;
}
- rchn = rp->event_channel;
- rport = lchn[lport].remote_port;
+ chn2 = p2->event_channel;
+ port2 = chn1[port1].remote_port;
- if ( rport >= rp->max_event_channel )
+ if ( port2 >= p2->max_event_channel )
BUG();
- if ( rchn[rport].state != ECS_CONNECTED )
+ if ( chn2[port2].state != ECS_CONNECTED )
BUG();
- if ( rchn[rport].remote_dom != lp )
+ if ( chn2[port2].remote_dom != p1 )
BUG();
- rchn[rport].state = ECS_ZOMBIE;
- rchn[rport].remote_dom = NULL;
- rchn[rport].remote_port = 0xFFFF;
+ chn2[port2].state = ECS_ZOMBIE;
+ chn2[port2].remote_dom = NULL;
+ chn2[port2].remote_port = 0xFFFF;
- cpu_mask = set_event_disc(lp, lport);
- cpu_mask |= set_event_disc(rp, rport);
+ cpu_mask = set_event_disc(p1, port1);
+ cpu_mask |= set_event_disc(p2, port2);
guest_event_notify(cpu_mask);
}
- lchn[lport].state = ECS_FREE;
- lchn[lport].remote_dom = NULL;
- lchn[lport].remote_port = 0xFFFF;
+ chn1[port1].state = ECS_FREE;
+ chn1[port1].remote_dom = NULL;
+ chn1[port1].remote_port = 0xFFFF;
out:
- spin_unlock(&lp->event_channel_lock);
- put_task_struct(lp);
+ spin_unlock(&p1->event_channel_lock);
+ put_task_struct(p1);
- if ( rp != NULL )
+ if ( p2 != NULL )
{
- spin_unlock(&rp->event_channel_lock);
- put_task_struct(rp);
+ spin_unlock(&p2->event_channel_lock);
+ put_task_struct(p2);
}
return rc;
static long event_channel_close(evtchn_close_t *close)
{
- struct task_struct *lp;
- int lport = close->local_port;
+ struct task_struct *p;
long rc;
- domid_t ldom = close->local_dom;
+ domid_t dom = close->dom;
- if ( ldom == DOMID_SELF )
- ldom = current->domain;
+ if ( dom == DOMID_SELF )
+ dom = current->domain;
else if ( !IS_PRIV(current) )
return -EPERM;
- if ( (lp = find_domain_by_id(ldom)) == NULL )
+ if ( (p = find_domain_by_id(dom)) == NULL )
return -ESRCH;
- rc = __event_channel_close(lp, lport);
+ rc = __event_channel_close(p, close->port);
- put_task_struct(lp);
+ put_task_struct(p);
return rc;
}
static long event_channel_status(evtchn_status_t *status)
{
- struct task_struct *lp;
- domid_t ldom = status->local_dom;
- int lport = status->local_port;
- event_channel_t *lchn;
+ struct task_struct *p;
+ domid_t dom = status->dom1;
+ int port = status->port1;
+ event_channel_t *chn;
- if ( ldom == DOMID_SELF )
- ldom = current->domain;
+ if ( dom == DOMID_SELF )
+ dom = current->domain;
else if ( !IS_PRIV(current) )
return -EPERM;
- if ( (lp = find_domain_by_id(ldom)) == NULL )
+ if ( (p = find_domain_by_id(dom)) == NULL )
return -ESRCH;
- spin_lock(&lp->event_channel_lock);
+ spin_lock(&p->event_channel_lock);
- lchn = lp->event_channel;
+ chn = p->event_channel;
- if ( (lport < 0) || (lport >= lp->max_event_channel) )
+ if ( (port < 0) || (port >= p->max_event_channel) )
{
- spin_unlock(&lp->event_channel_lock);
+ spin_unlock(&p->event_channel_lock);
return -EINVAL;
}
- switch ( lchn[lport].state )
+ switch ( chn[port].state )
{
case ECS_FREE:
status->status = EVTCHNSTAT_closed;
break;
case ECS_CONNECTED:
status->status = EVTCHNSTAT_connected;
- status->remote_dom = lchn[lport].remote_dom->domain;
- status->remote_port = lchn[lport].remote_port;
+ status->dom2 = chn[port].remote_dom->domain;
+ status->port2 = chn[port].remote_port;
break;
default:
BUG();
}
- spin_unlock(&lp->event_channel_lock);
+ spin_unlock(&p->event_channel_lock);
return 0;
}
#define __HYPERVISOR_IFS__EVENT_CHANNEL_H__
/*
- * EVTCHNOP_open: Open a communication channel between <local_dom> and
- * <remote_dom>.
+ * EVTCHNOP_open: Open a communication channel between <dom1> and <dom2>.
* NOTES:
- * 1. <local_dom> may be specified as DOMID_SELF.
+ * 1. <dom1> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may create an event channel.
- * 3. <local_port> and <remote_port> are only supplied if the op succeeds.
+ * 3. <port1> and <port2> are only supplied if the op succeeds.
*/
#define EVTCHNOP_open 0
typedef struct evtchn_open
{
/* IN parameters. */
- domid_t local_dom, remote_dom;
+ domid_t dom1, dom2;
/* OUT parameters. */
- int local_port, remote_port;
+ int port1, port2;
} evtchn_open_t;
/*
* EVTCHNOP_close: Close the communication channel which has an endpoint at
- * <local_dom, local_port>.
+ * <dom, port>.
* NOTES:
- * 1. <local_dom> may be specified as DOMID_SELF.
+ * 1. <dom> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may close an event channel
- * for which <local_dom> is not DOMID_SELF.
+ * for which <dom> is not DOMID_SELF.
*/
#define EVTCHNOP_close 1
typedef struct evtchn_close
{
/* IN parameters. */
- domid_t local_dom;
- int local_port;
+ domid_t dom;
+ int port;
/* No OUT parameters. */
} evtchn_close_t;
/*
* EVTCHNOP_status: Get the current status of the communication channel which
- * has an endpoint at <local_dom, local_port>.
+ * has an endpoint at <dom1, port1>.
* NOTES:
- * 1. <local_dom> may be specified as DOMID_SELF.
+ * 1. <dom1> may be specified as DOMID_SELF.
* 2. Only a sufficiently-privileged domain may obtain the status of an event
- * channel for which <local_dom> is not DOMID_SELF.
- * 3. <remote_dom, remote_port> is only supplied if status is 'connected'.
+ * channel for which <dom1> is not DOMID_SELF.
+ * 3. <dom2, port2> is only supplied if status is 'connected'.
*/
#define EVTCHNOP_status 3 /* Get status of <channel id>. */
typedef struct evtchn_status
{
/* IN parameters */
- domid_t local_dom;
- int local_port;
+ domid_t dom1;
+ int port1;
/* OUT parameters */
- domid_t remote_dom;
- int remote_port;
+ domid_t dom2;
+ int port2;
#define EVTCHNSTAT_closed 0 /* Chennel is not in use. */
#define EVTCHNSTAT_disconnected 1 /* Channel is not connected to remote. */
#define EVTCHNSTAT_connected 2 /* Channel is connected to remote. */